From c791e5216ad58b285d4a3090f0d8482b8289b7cd Mon Sep 17 00:00:00 2001 From: "maf46@burn.cl.cam.ac.uk" Date: Tue, 22 Feb 2005 14:20:36 +0000 Subject: [PATCH] bitkeeper revision 1.1234 (421b3fb4-44X_GoE5Kjk9WGfmRE_rg) Move the phys-to-mach map from PERDOMAIN_VIRT_START to RO_MPT_VIRT_START (which isn't otherwise used by shadow_mode_shadow()==true domains). Signed-off-by: michael.fetterman@cl.cam.ac.uk --- xen/arch/x86/domain.c | 11 ++++------- xen/arch/x86/x86_32/mm.c | 8 ++++++-- xen/include/asm-x86/mm.h | 16 +++++++++------- 3 files changed, 19 insertions(+), 16 deletions(-) diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index cb9643a262..1bffcb6eb3 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -299,7 +299,7 @@ void arch_vmx_do_launch(struct exec_domain *ed) static void alloc_monitor_pagetable(struct exec_domain *ed) { unsigned long mmfn; - l2_pgentry_t *mpl2e, *phys_table; + l2_pgentry_t *mpl2e; struct pfn_info *mmfn_info; struct domain *d = ed->domain; @@ -323,12 +323,9 @@ static void alloc_monitor_pagetable(struct exec_domain *ed) ed->arch.monitor_table = mk_pagetable(mmfn << PAGE_SHIFT); ed->arch.monitor_vtable = mpl2e; - phys_table = (l2_pgentry_t *) - map_domain_mem(pagetable_val(ed->arch.phys_table)); - memcpy(d->arch.mm_perdomain_pt, phys_table, - L1_PAGETABLE_ENTRIES * sizeof(l1_pgentry_t)); - - unmap_domain_mem(phys_table); + // map the phys_to_machine map into the Read-Only MPT space for this domain + mpl2e[l2_table_offset(RO_MPT_VIRT_START)] = + mk_l2_pgentry(pagetable_val(ed->arch.phys_table) | __PAGE_HYPERVISOR); } /* diff --git a/xen/arch/x86/x86_32/mm.c b/xen/arch/x86/x86_32/mm.c index 788e7c1241..0ba7feff7f 100644 --- a/xen/arch/x86/x86_32/mm.c +++ b/xen/arch/x86/x86_32/mm.c @@ -119,10 +119,14 @@ void __init paging_init(void) idle_pg_table[l2_table_offset(IOREMAP_VIRT_START)] = mk_l2_pgentry(__pa(ioremap_pt) | __PAGE_HYPERVISOR); - /* Create read-only mapping of MPT for guest-OS use. */ + /* Create read-only mapping of MPT for guest-OS use. + * NB. Remove the global bit so that shadow_mode_translate()==true domains + * can reused this address space for their phys-to-machine mapping. + */ idle_pg_table[l2_table_offset(RO_MPT_VIRT_START)] = mk_l2_pgentry(l2_pgentry_val( - idle_pg_table[l2_table_offset(RDWR_MPT_VIRT_START)]) & ~_PAGE_RW); + idle_pg_table[l2_table_offset(RDWR_MPT_VIRT_START)]) & + ~(_PAGE_RW | _PAGE_GLOBAL)); /* Set up mapping cache for domain pages. */ mapcache = (unsigned long *)alloc_xenheap_page(); diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h index 1d0cf33c24..3e4b1d4b0b 100644 --- a/xen/include/asm-x86/mm.h +++ b/xen/include/asm-x86/mm.h @@ -235,9 +235,11 @@ void synchronise_pagetables(unsigned long cpu_mask); /* * The phys_to_machine_mapping is the reversed mapping of MPT for full - * virtualization. + * virtualization. It is only used by shadow_mode_translate()==true + * guests, so we steal the address space that would have normally + * been used by the read-only MPT map. */ -#define __phys_to_machine_mapping ((unsigned long *)PERDOMAIN_VIRT_START) +#define __phys_to_machine_mapping ((unsigned long *)RO_MPT_VIRT_START) /* Returns the machine physical */ static inline unsigned long phys_to_machine_mapping(unsigned long pfn) @@ -245,11 +247,11 @@ static inline unsigned long phys_to_machine_mapping(unsigned long pfn) unsigned long mfn; l1_pgentry_t pte; - if (__get_user(l1_pgentry_val(pte), (__phys_to_machine_mapping + pfn))) { - return 0; - } - - mfn = l1_pgentry_to_phys(pte) >> PAGE_SHIFT; + if (__get_user(l1_pgentry_val(pte), (__phys_to_machine_mapping + pfn))) + mfn = 0; + else + mfn = l1_pgentry_to_phys(pte) >> PAGE_SHIFT; + return mfn; } #define set_machinetophys(_mfn, _pfn) machine_to_phys_mapping[(_mfn)] = (_pfn) -- 2.30.2